#endif /* defined(__x86_64__) */
+void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
+{
+ v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
+ hvm_funcs.update_guest_cr3(v);
+}
+
/* Initialise a hypercall transfer page for a VMX domain using
paravirtualised drivers. */
void hvm_hypercall_page_initialise(struct domain *d,
/* SVM doesn't have a HOST_CR3 equivalent to update. */
}
+void svm_update_guest_cr3(struct vcpu *v)
+{
+ v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
+}
+
unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
{
switch ( num )
hvm_funcs.get_segment_register = svm_get_segment_register;
hvm_funcs.update_host_cr3 = svm_update_host_cr3;
+ hvm_funcs.update_guest_cr3 = svm_update_guest_cr3;
hvm_funcs.stts = svm_stts;
hvm_funcs.set_tsc_offset = svm_set_tsc_offset;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
(unsigned long) (mfn << PAGE_SHIFT));
- vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
}
return 0;
}
shadow_update_paging_modes(v);
- vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
}
else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
}
/* we should take care of this kind of situation */
shadow_update_paging_modes(v);
- vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
}
v->arch.hvm_svm.cpu_cr3 = value;
update_cr3(v);
- vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
}
break;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
(unsigned long) (mfn << PAGE_SHIFT));
- vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
-
HVM_DBG_LOG(DBG_LEVEL_VMMU,
"Update CR3 value = %lx, mfn = %lx",
v->arch.hvm_svm.cpu_cr3, mfn);
__vmwrite(GUEST_TR_BASE, 0);
__vmwrite(GUEST_TR_LIMIT, 0xff);
- shadow_update_paging_modes(v);
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
- __vmwrite(HOST_CR3, v->arch.cr3);
-
vmx_vmcs_exit(v);
+
+ shadow_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */
}
int vmx_create_vmcs(struct vcpu *v)
return (vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE));
}
-/* Works only for vcpu == current */
static void vmx_update_host_cr3(struct vcpu *v)
{
- ASSERT(v == current);
+ ASSERT( (v == current) || !vcpu_runnable(v) );
+ vmx_vmcs_enter(v);
__vmwrite(HOST_CR3, v->arch.cr3);
+ vmx_vmcs_exit(v);
}
+static void vmx_update_guest_cr3(struct vcpu *v)
+{
+ ASSERT( (v == current) || !vcpu_runnable(v) );
+ vmx_vmcs_enter(v);
+ __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
+ vmx_vmcs_exit(v);
+}
+
+
static void vmx_inject_exception(
unsigned int trapnr, int errcode, unsigned long cr2)
{
hvm_funcs.get_segment_register = vmx_get_segment_register;
hvm_funcs.update_host_cr3 = vmx_update_host_cr3;
+ hvm_funcs.update_guest_cr3 = vmx_update_guest_cr3;
hvm_funcs.stts = vmx_stts;
hvm_funcs.set_tsc_offset = vmx_set_tsc_offset;
__vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
shadow_update_paging_modes(v);
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
return 0;
bad_cr3:
HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
(unsigned long) (mfn << PAGE_SHIFT));
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
/*
* arch->shadow_table should hold the next CR3 for shadow
*/
__vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
}
shadow_update_paging_modes(v);
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
}
return 1;
*/
v->arch.hvm_vmx.cpu_cr3 = value;
update_cr3(v);
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
- value);
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
}
break;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
(unsigned long) (mfn << PAGE_SHIFT));
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
-
/*
* arch->shadow_table should hold the next CR3 for shadow
*/
-
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
+ HVM_DBG_LOG(DBG_LEVEL_VMMU,
+ "Update CR3 value = %lx, mfn = %lx",
v->arch.hvm_vmx.cpu_cr3, mfn);
#endif
}
{
mfn_t mmfn = shadow_make_monitor_table(v);
v->arch.monitor_table = pagetable_from_mfn(mmfn);
- }
+ make_cr3(v, mfn_x(mmfn));
+ hvm_update_host_cr3(v);
+ }
if ( v->arch.shadow.mode != old_mode )
{
if ( is_hvm_domain(d) )
hvm_store_cpu_guest_regs(v, regs, NULL);
- SHADOW_PRINTK("emulate: eip=%#lx\n", regs->eip);
+ SHADOW_PRINTK("emulate: eip=%#lx\n", (unsigned long)regs->eip);
emul_ops = shadow_init_emulation(&emul_ctxt, regs);
ASSERT(is_hvm_domain(d));
#if SHADOW_PAGING_LEVELS == 3
/* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
- v->arch.hvm_vcpu.hw_cr3 = virt_to_maddr(&v->arch.shadow.l3table);
+ hvm_update_guest_cr3(v, virt_to_maddr(&v->arch.shadow.l3table));
#else
/* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
- v->arch.hvm_vcpu.hw_cr3 = pagetable_get_paddr(v->arch.shadow_table[0]);
+ hvm_update_guest_cr3(v, pagetable_get_paddr(v->arch.shadow_table[0]));
#endif
}
*/
void (*update_host_cr3)(struct vcpu *v);
+ /*
+ * Called to inform HVM layer that a guest cr3 has changed
+ */
+ void (*update_guest_cr3)(struct vcpu *v);
+
/*
* Update specifics of the guest state:
* 1) TS bit in guest cr0
hvm_funcs.update_host_cr3(v);
}
+void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
+
void hvm_hypercall_page_initialise(struct domain *d,
void *hypercall_page);